library(tidyverse)
library(mlbench)
library(ggfortify)
library(GGally)
library(scagnostics)
library(mlr)
Pima Indians Diabetes dataset from mlbench package.
data(PimaIndiansDiabetes)
PimaIndiansDiabetes %>%
head()
## pregnant glucose pressure triceps insulin mass pedigree age diabetes
## 1 6 148 72 35 0 33.6 0.627 50 pos
## 2 1 85 66 29 0 26.6 0.351 31 neg
## 3 8 183 64 0 0 23.3 0.672 32 pos
## 4 1 89 66 23 94 28.1 0.167 21 neg
## 5 0 137 40 35 168 43.1 2.288 33 pos
## 6 5 116 74 0 0 25.6 0.201 30 neg
# The palette with grey:
cbp1 <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot <- function(...) ggplot2::ggplot(...) +
scale_color_manual(values = cbp1) +
scale_fill_manual(values = cbp1) + # note: needs to be overridden when using continuous color scales
theme_bw()
Exploratory Data Analysis (EDA) is the backbone of data analysis, including those that result in a machine learning model. EDA helps us to understand the data we are working with and put it into context, so that we are able to ask the right questions (or to put our questions into the right frame). It helps us take appropriate measures for cleaning, normalization/transformation, dealing with missing values, feature preparation and engineering, etc. Particularly if our machine learning model is trained on a limited dataset (but not only then!), appropriate data preparation can vastly improve the machine learning process: models will often train faster and achieve higher accuracy.
An essential part of EDA is data visualization.
Typically, we want to start by exploring potential sources of errors in our data, like
Depending on the number of features/variables we have, it makes sense to look at them all individually and in correlation with each other. Depending on whether we have a categorical or continuous variable, we might be interested in properties that are shown by
If our target variable is categorical, we will want to look at potential imbalances between the classes. Class imbalance will strongly affect the machine learning modeling process and will require us to consider up-/downsampling or similar techniques before we train a model.
Correlation analysis can show us, for example
Additional methods can be used to visualize groups of related features. These methods are often especially useful if we have a large dataset with a large feature set (highly dimensional data). Some of these methods for visualizing groups of related features and/or for comparing multiple variables and visualizing their relationships are:
# in our dataset,
# continuous variables are
PimaIndiansDiabetes %>%
dplyr::select(where(is.numeric)) %>%
head()
## pregnant glucose pressure triceps insulin mass pedigree age
## 1 6 148 72 35 0 33.6 0.627 50
## 2 1 85 66 29 0 26.6 0.351 31
## 3 8 183 64 0 0 23.3 0.672 32
## 4 1 89 66 23 94 28.1 0.167 21
## 5 0 137 40 35 168 43.1 2.288 33
## 6 5 116 74 0 0 25.6 0.201 30
# 'diabetes' is the only categorical variable is also our target or dependent variable
PimaIndiansDiabetes %>%
dplyr::select(!where(is.numeric)) %>%
head()
## diabetes
## 1 pos
## 2 neg
## 3 pos
## 4 neg
## 5 pos
## 6 neg
# bar plot of target
PimaIndiansDiabetes %>%
ggplot(aes(x = diabetes, fill = diabetes)) +
geom_bar(alpha = 0.8) +
theme(legend.position = "none") +
labs(x = "Diabetes outcome",
y = "count",
title = "Barplot of categorical features",
caption = "Source: Pima Indians Diabetes Database")
# boxplot of continuous features
PimaIndiansDiabetes %>%
gather("key", "value", pregnant:age) %>%
ggplot(aes(x = value, fill = diabetes)) +
facet_wrap(vars(key), ncol = 3, scales = "free") +
geom_boxplot(alpha = 0.8) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank())
# histogram of features
PimaIndiansDiabetes %>%
gather("key", "value", pregnant:age) %>%
ggplot(aes(x = value, fill = diabetes)) +
facet_wrap(vars(key), ncol = 3, scales = "free") +
geom_histogram(alpha = 0.8) +
labs(x = "value of feature in facet",
y = "count",
fill = "Diabetes",
title = "Histogram of features",
caption = "Source: Pima Indians Diabetes Database")
# density plot of of features
PimaIndiansDiabetes %>%
gather("key", "value", pregnant:age) %>%
ggplot(aes(x = value, fill = diabetes)) +
facet_wrap(vars(key), ncol = 3, scales = "free") +
geom_density(alpha = 0.8) +
labs(x = "value of feature in facet",
y = "density",
fill = "Diabetes",
title = "Density of continuous features",
caption = "Source: Pima Indians Diabetes Database")
# correlation plot of features
mat <- PimaIndiansDiabetes %>%
dplyr::select(where(is.numeric))
cormat <- round(cor(mat), 2)
cormat <- cormat %>%
as_data_frame() %>%
mutate(x = colnames(mat)) %>%
gather(key = "y", value = "value", pregnant:age)
cormat %>%
remove_missing() %>%
arrange(x, y) %>%
ggplot(aes(x = x, y = y, fill = value)) +
geom_tile() +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name = "Pearson\nCorrelation") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
coord_fixed() +
labs(x = "feature",
y = "feature",
title = "Correlation between features",
caption = "Source: Pima Indians Diabetes Database")
# scatterplot matrix
ggpairs(PimaIndiansDiabetes,
columns = c(1:8),
alpha = 0.7) +
labs(x = "feature",
y = "feature",
title = "Scatterplot matrix",
caption = "Source: Pima Indians Diabetes Database")
# PCA
prep <- PimaIndiansDiabetes %>%
dplyr::select(where(is.numeric))
pca <- prep %>%
prcomp(scale. = TRUE)
autoplot(pca,
data = PimaIndiansDiabetes,
colour = 'diabetes',
shape = 'diabetes',
loadings = TRUE,
loadings.colour = 'blue',
loadings.label = TRUE,
loadings.label.size = 3) +
scale_color_manual(values = cbp1) +
scale_fill_manual(values = cbp1) +
theme_bw() +
labs(title = "Principal Component Analysis (PCA)",
caption = "Source: Pima Indians Diabetes Database")
# MDS
d <- dist(prep) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
fit$points %>%
head()
## [,1] [,2]
## 1 -75.71465 -35.950783
## 2 -82.35827 28.908213
## 3 -74.63064 -67.906496
## 4 11.07742 34.898486
## 5 89.74379 -2.746937
## 6 -80.97792 -3.946887
# Sammon mapping
library(MASS)
sam <- sammon(dist(prep))
## Initial stress : 0.03033
## stress after 0 iters: 0.03033
sam$points %>%
head()
## [,1] [,2]
## 1 -75.71465 -35.950783
## 2 -82.35827 28.908213
## 3 -74.63064 -67.906496
## 4 11.07742 34.898486
## 5 89.74379 -2.746937
## 6 -80.97792 -3.946887
# parallel coordinate plots
ggparcoord(data = PimaIndiansDiabetes,
columns = c(1:8),
groupColumn = 9,
scale = "robust",
order = "skewness",
alpha = 0.7)
# scagnostics
scagnostics_dataset <- scagnostics(PimaIndiansDiabetes)
# scagnostics grid
scagnostics_grid_dataset <- scagnosticsGrid(scagnostics_dataset)
# outliers
scagnostics_o_dataset <- scagnosticsOutliers(scagnostics_dataset)
scagnostics_o_dataset[scagnostics_o_dataset]
## pregnant * age
## TRUE
outlier <- scagnostics_grid_dataset[scagnostics_o_dataset,]
# scagnostics exemplars
scagnostics_ex_dataset <- scagnosticsExemplars(scagnostics_dataset)
scagnostics_ex_dataset[scagnostics_ex_dataset]
## pregnant * triceps mass * age triceps * diabetes
## TRUE TRUE TRUE
exemplars <- scagnostics_grid_dataset[scagnostics_ex_dataset,]
(using mlr package)
set.seed(1000)
train_index <- sample(1:nrow(PimaIndiansDiabetes), 0.8 * nrow(PimaIndiansDiabetes))
test_index <- setdiff(1:nrow(PimaIndiansDiabetes), train_index)
train <- PimaIndiansDiabetes[train_index,]
test <- PimaIndiansDiabetes[test_index,]
list( train = summary(train), test = summary(test) )
## $train
## pregnant glucose pressure triceps
## Min. : 0.000 Min. : 0.0 Min. : 0.00 Min. : 0.00
## 1st Qu.: 1.000 1st Qu.:100.0 1st Qu.: 64.00 1st Qu.: 0.00
## Median : 3.000 Median :119.0 Median : 72.00 Median :23.00
## Mean : 3.894 Mean :123.1 Mean : 68.89 Mean :20.66
## 3rd Qu.: 6.000 3rd Qu.:143.0 3rd Qu.: 80.00 3rd Qu.:32.75
## Max. :17.000 Max. :199.0 Max. :114.00 Max. :99.00
## insulin mass pedigree age diabetes
## Min. : 0.00 Min. : 0.00 Min. :0.0780 Min. :21.00 neg:386
## 1st Qu.: 0.00 1st Qu.:27.10 1st Qu.:0.2442 1st Qu.:24.00 pos:228
## Median : 36.50 Median :32.00 Median :0.3780 Median :29.00
## Mean : 81.65 Mean :31.92 Mean :0.4742 Mean :33.42
## 3rd Qu.:131.50 3rd Qu.:36.38 3rd Qu.:0.6355 3rd Qu.:41.00
## Max. :846.00 Max. :59.40 Max. :2.4200 Max. :81.00
##
## $test
## pregnant glucose pressure triceps
## Min. : 0.000 Min. : 0.0 Min. : 0.00 Min. : 0.00
## 1st Qu.: 1.000 1st Qu.: 93.0 1st Qu.: 62.00 1st Qu.: 0.00
## Median : 2.000 Median :108.0 Median : 72.00 Median :23.00
## Mean : 3.649 Mean :112.3 Mean : 69.96 Mean :20.03
## 3rd Qu.: 6.000 3rd Qu.:133.8 3rd Qu.: 79.50 3rd Qu.:32.00
## Max. :14.000 Max. :197.0 Max. :122.00 Max. :56.00
## insulin mass pedigree age diabetes
## Min. : 0.0 Min. : 0.00 Min. :0.0850 Min. :21.00 neg:114
## 1st Qu.: 0.0 1st Qu.:27.80 1st Qu.:0.2395 1st Qu.:23.25 pos: 40
## Median : 20.5 Median :32.40 Median :0.3380 Median :29.00
## Mean : 72.4 Mean :32.29 Mean :0.4627 Mean :32.54
## 3rd Qu.:100.0 3rd Qu.:36.88 3rd Qu.:0.6008 3rd Qu.:39.75
## Max. :744.0 Max. :67.10 Max. :2.3290 Max. :67.00
listLearners()
## class
## 1 classif.ada
## 2 classif.adaboostm1
## 3 classif.bartMachine
## 4 classif.binomial
## 5 classif.boosting
## 6 classif.bst
## 7 classif.C50
## 8 classif.cforest
## 9 classif.clusterSVM
## 10 classif.ctree
## 11 classif.cvglmnet
## 12 classif.dbnDNN
## 13 classif.dcSVM
## 14 classif.earth
## 15 classif.evtree
## 16 classif.extraTrees
## 17 classif.fdausc.glm
## 18 classif.fdausc.kernel
## 19 classif.fdausc.knn
## 20 classif.fdausc.np
## 21 classif.FDboost
## 22 classif.featureless
## 23 classif.fgam
## 24 classif.fnn
## 25 classif.gamboost
## 26 classif.gaterSVM
## 27 classif.gausspr
## 28 classif.gbm
## 29 classif.geoDA
## 30 classif.glmboost
## 31 classif.glmnet
## 32 classif.h2o.deeplearning
## 33 classif.h2o.gbm
## 34 classif.h2o.glm
## 35 classif.h2o.randomForest
## 36 classif.IBk
## 37 classif.J48
## 38 classif.JRip
## 39 classif.kknn
## 40 classif.knn
## 41 classif.ksvm
## 42 classif.lda
## 43 classif.LiblineaRL1L2SVC
## 44 classif.LiblineaRL1LogReg
## 45 classif.LiblineaRL2L1SVC
## 46 classif.LiblineaRL2LogReg
## 47 classif.LiblineaRL2SVC
## 48 classif.LiblineaRMultiClassSVC
## 49 classif.linDA
## 50 classif.logreg
## 51 classif.lssvm
## 52 classif.lvq1
## 53 classif.mda
## 54 classif.mlp
## 55 classif.multinom
## 56 classif.naiveBayes
## 57 classif.neuralnet
## 58 classif.nnet
## 59 classif.nnTrain
## 60 classif.nodeHarvest
## 61 classif.OneR
## 62 classif.pamr
## 63 classif.PART
## 64 classif.penalized
## 65 classif.plr
## 66 classif.plsdaCaret
## 67 classif.probit
## 68 classif.qda
## 69 classif.quaDA
## 70 classif.randomForest
## 71 classif.randomForestSRC
## 72 classif.ranger
## 73 classif.rda
## 74 classif.rFerns
## 75 classif.rknn
## 76 classif.rotationForest
## 77 classif.rpart
## 78 classif.RRF
## 79 classif.rrlda
## 80 classif.saeDNN
## 81 classif.sda
## 82 classif.sparseLDA
## 83 classif.svm
## 84 classif.xgboost
## 85 cluster.cmeans
## 86 cluster.Cobweb
## 87 cluster.dbscan
## 88 cluster.EM
## 89 cluster.FarthestFirst
## 90 cluster.kkmeans
## 91 cluster.kmeans
## 92 cluster.MiniBatchKmeans
## 93 cluster.SimpleKMeans
## 94 cluster.XMeans
## 95 multilabel.cforest
## 96 multilabel.randomForestSRC
## 97 multilabel.rFerns
## 98 regr.bartMachine
## 99 regr.bcart
## 100 regr.bgp
## 101 regr.bgpllm
## 102 regr.blm
## 103 regr.brnn
## 104 regr.bst
## 105 regr.btgp
## 106 regr.btgpllm
## 107 regr.btlm
## 108 regr.cforest
## 109 regr.crs
## 110 regr.ctree
## 111 regr.cubist
## 112 regr.cvglmnet
## 113 regr.earth
## 114 regr.evtree
## 115 regr.extraTrees
## 116 regr.FDboost
## 117 regr.featureless
## 118 regr.fgam
## 119 regr.fnn
## 120 regr.frbs
## 121 regr.gamboost
## 122 regr.gausspr
## 123 regr.gbm
## 124 regr.glm
## 125 regr.glmboost
## 126 regr.glmnet
## 127 regr.GPfit
## 128 regr.h2o.deeplearning
## 129 regr.h2o.gbm
## 130 regr.h2o.glm
## 131 regr.h2o.randomForest
## 132 regr.IBk
## 133 regr.kknn
## 134 regr.km
## 135 regr.ksvm
## 136 regr.laGP
## 137 regr.LiblineaRL2L1SVR
## 138 regr.LiblineaRL2L2SVR
## 139 regr.lm
## 140 regr.mars
## 141 regr.mob
## 142 regr.nnet
## 143 regr.nodeHarvest
## 144 regr.pcr
## 145 regr.penalized
## 146 regr.plsr
## 147 regr.randomForest
## 148 regr.randomForestSRC
## 149 regr.ranger
## 150 regr.rknn
## 151 regr.rpart
## 152 regr.RRF
## 153 regr.rsm
## 154 regr.rvm
## 155 regr.svm
## 156 regr.xgboost
## 157 surv.cforest
## 158 surv.coxph
## 159 surv.cvglmnet
## 160 surv.gamboost
## 161 surv.gbm
## 162 surv.glmboost
## 163 surv.glmnet
## 164 surv.randomForestSRC
## 165 surv.ranger
## 166 surv.rpart
## name
## 1 ada Boosting
## 2 ada Boosting M1
## 3 Bayesian Additive Regression Trees
## 4 Binomial Regression
## 5 Adabag Boosting
## 6 Gradient Boosting
## 7 C50
## 8 Random forest based on conditional inference trees
## 9 Clustered Support Vector Machines
## 10 Conditional Inference Trees
## 11 GLM with Lasso or Elasticnet Regularization (Cross Validated Lambda)
## 12 Deep neural network with weights initialized by DBN
## 13 Divided-Conquer Support Vector Machines
## 14 Flexible Discriminant Analysis
## 15 Evolutionary learning of globally optimal trees
## 16 Extremely Randomized Trees
## 17 Generalized Linear Models classification on FDA
## 18 Kernel classification on FDA
## 19 fdausc.knn
## 20 Nonparametric classification on FDA
## 21 Functional linear array classification boosting
## 22 Featureless classifier
## 23 functional general additive model
## 24 Fast k-Nearest Neighbour
## 25 Gradient boosting with smooth components
## 26 Mixture of SVMs with Neural Network Gater Function
## 27 Gaussian Processes
## 28 Gradient Boosting Machine
## 29 Geometric Predictive Discriminant Analysis
## 30 Boosting for GLMs
## 31 GLM with Lasso or Elasticnet Regularization
## 32 h2o.deeplearning
## 33 h2o.gbm
## 34 h2o.glm
## 35 h2o.randomForest
## 36 k-Nearest Neighbours
## 37 J48 Decision Trees
## 38 Propositional Rule Learner
## 39 k-Nearest Neighbor
## 40 k-Nearest Neighbor
## 41 Support Vector Machines
## 42 Linear Discriminant Analysis
## 43 L1-Regularized L2-Loss Support Vector Classification
## 44 L1-Regularized Logistic Regression
## 45 L2-Regularized L1-Loss Support Vector Classification
## 46 L2-Regularized Logistic Regression
## 47 L2-Regularized L2-Loss Support Vector Classification
## 48 Support Vector Classification by Crammer and Singer
## 49 Linear Discriminant Analysis
## 50 Logistic Regression
## 51 Least Squares Support Vector Machine
## 52 Learning Vector Quantization
## 53 Mixture Discriminant Analysis
## 54 Multi-Layer Perceptron
## 55 Multinomial Regression
## 56 Naive Bayes
## 57 Neural Network from neuralnet
## 58 Neural Network
## 59 Training Neural Network by Backpropagation
## 60 Node Harvest
## 61 1-R Classifier
## 62 Nearest shrunken centroid
## 63 PART Decision Lists
## 64 Penalized Logistic Regression
## 65 Logistic Regression with a L2 Penalty
## 66 Partial Least Squares (PLS) Discriminant Analysis
## 67 Probit Regression
## 68 Quadratic Discriminant Analysis
## 69 Quadratic Discriminant Analysis
## 70 Random Forest
## 71 Random Forest
## 72 Random Forests
## 73 Regularized Discriminant Analysis
## 74 Random ferns
## 75 Random k-Nearest-Neighbors
## 76 Rotation Forest
## 77 Decision Tree
## 78 Regularized Random Forests
## 79 Robust Regularized Linear Discriminant Analysis
## 80 Deep neural network with weights initialized by Stacked AutoEncoder
## 81 Shrinkage Discriminant Analysis
## 82 Sparse Discriminant Analysis
## 83 Support Vector Machines (libsvm)
## 84 eXtreme Gradient Boosting
## 85 Fuzzy C-Means Clustering
## 86 Cobweb Clustering Algorithm
## 87 DBScan Clustering
## 88 Expectation-Maximization Clustering
## 89 FarthestFirst Clustering Algorithm
## 90 Kernel K-Means
## 91 K-Means
## 92 MiniBatchKmeans
## 93 K-Means Clustering
## 94 XMeans (k-means with automatic determination of k)
## 95 Random forest based on conditional inference trees
## 96 Random Forest
## 97 Random ferns
## 98 Bayesian Additive Regression Trees
## 99 Bayesian CART
## 100 Bayesian Gaussian Process
## 101 Bayesian Gaussian Process with jumps to the Limiting Linear Model
## 102 Bayesian Linear Model
## 103 Bayesian regularization for feed-forward neural networks
## 104 Gradient Boosting
## 105 Bayesian Treed Gaussian Process
## 106 Bayesian Treed Gaussian Process with jumps to the Limiting Linear Model
## 107 Bayesian Treed Linear Model
## 108 Random Forest Based on Conditional Inference Trees
## 109 Regression Splines
## 110 Conditional Inference Trees
## 111 Cubist
## 112 GLM with Lasso or Elasticnet Regularization (Cross Validated Lambda)
## 113 Multivariate Adaptive Regression Splines
## 114 Evolutionary learning of globally optimal trees
## 115 Extremely Randomized Trees
## 116 Functional linear array regression boosting
## 117 Featureless regression
## 118 functional general additive model
## 119 Fast k-Nearest Neighbor
## 120 Fuzzy Rule-based Systems
## 121 Gradient Boosting with Smooth Components
## 122 Gaussian Processes
## 123 Gradient Boosting Machine
## 124 Generalized Linear Regression
## 125 Boosting for GLMs
## 126 GLM with Lasso or Elasticnet Regularization
## 127 Gaussian Process
## 128 h2o.deeplearning
## 129 h2o.gbm
## 130 h2o.glm
## 131 h2o.randomForest
## 132 K-Nearest Neighbours
## 133 K-Nearest-Neighbor regression
## 134 Kriging
## 135 Support Vector Machines
## 136 Local Approximate Gaussian Process
## 137 L2-Regularized L1-Loss Support Vector Regression
## 138 L2-Regularized L2-Loss Support Vector Regression
## 139 Simple Linear Regression
## 140 Multivariate Adaptive Regression Splines
## 141 Model-based Recursive Partitioning Yielding a Tree with Fitted Models Associated with each Terminal Node
## 142 Neural Network
## 143 Node Harvest
## 144 Principal Component Regression
## 145 Penalized Regression
## 146 Partial Least Squares Regression
## 147 Random Forest
## 148 Random Forest
## 149 Random Forests
## 150 Random k-Nearest-Neighbors
## 151 Decision Tree
## 152 Regularized Random Forests
## 153 Response Surface Regression
## 154 Relevance Vector Machine
## 155 Support Vector Machines (libsvm)
## 156 eXtreme Gradient Boosting
## 157 Random Forest based on Conditional Inference Trees
## 158 Cox Proportional Hazard Model
## 159 GLM with Regularization (Cross Validated Lambda)
## 160 Gradient boosting with smooth components
## 161 Gradient Boosting Machine
## 162 Gradient Boosting with Componentwise Linear Models
## 163 GLM with Regularization
## 164 Random Forest
## 165 Random Forests
## 166 Survival Tree
## short.name package
## 1 ada ada,rpart
## 2 adaboostm1 RWeka
## 3 bartmachine bartMachine
## 4 binomial stats
## 5 adabag adabag,rpart
## 6 bst bst,rpart
## 7 C50 C50
## 8 cforest party
## 9 clusterSVM SwarmSVM,LiblineaR
## 10 ctree party
## 11 cvglmnet glmnet
## 12 dbn.dnn deepnet
## 13 dcSVM SwarmSVM,e1071
## 14 fda earth,stats
## 15 evtree evtree
## 16 extraTrees extraTrees
## 17 fdausc.glm fda.usc
## 18 fdausc.kernel fda.usc
## 19 fdausc.knn fda.usc
## 20 fdausc.np fda.usc
## 21 FDboost FDboost,mboost
## 22 featureless mlr
## 23 FGAM refund
## 24 fnn FNN
## 25 gamboost mboost
## 26 gaterSVM SwarmSVM
## 27 gausspr kernlab
## 28 gbm gbm
## 29 geoda DiscriMiner
## 30 glmboost mboost
## 31 glmnet glmnet
## 32 h2o.dl h2o
## 33 h2o.gbm h2o
## 34 h2o.glm h2o
## 35 h2o.rf h2o
## 36 ibk RWeka
## 37 j48 RWeka
## 38 jrip RWeka
## 39 kknn kknn
## 40 knn class
## 41 ksvm kernlab
## 42 lda MASS
## 43 liblinl1l2svc LiblineaR
## 44 liblinl1logreg LiblineaR
## 45 liblinl2l1svc LiblineaR
## 46 liblinl2logreg LiblineaR
## 47 liblinl2svc LiblineaR
## 48 liblinmulticlasssvc LiblineaR
## 49 linda DiscriMiner
## 50 logreg stats
## 51 lssvm kernlab
## 52 lvq1 class
## 53 mda mda
## 54 mlp RSNNS
## 55 multinom nnet
## 56 nbayes e1071
## 57 neuralnet neuralnet
## 58 nnet nnet
## 59 nn.train deepnet
## 60 nodeHarvest nodeHarvest
## 61 oner RWeka
## 62 pamr pamr
## 63 part RWeka
## 64 penalized penalized
## 65 plr stepPlr
## 66 plsdacaret caret,pls
## 67 probit stats
## 68 qda MASS
## 69 quada DiscriMiner
## 70 rf randomForest
## 71 rfsrc randomForestSRC
## 72 ranger ranger
## 73 rda klaR
## 74 rFerns rFerns
## 75 rknn rknn
## 76 rotationForest rotationForest
## 77 rpart rpart
## 78 RRF RRF
## 79 rrlda rrlda
## 80 sae.dnn deepnet
## 81 sda sda
## 82 sparseLDA sparseLDA,MASS,elasticnet
## 83 svm e1071
## 84 xgboost xgboost
## 85 cmeans e1071,clue
## 86 cobweb RWeka
## 87 dbscan fpc
## 88 em RWeka
## 89 farthestfirst RWeka
## 90 kkmeans kernlab
## 91 kmeans stats,clue
## 92 MBatchKmeans ClusterR
## 93 simplekmeans RWeka
## 94 xmeans RWeka
## 95 cforest party
## 96 rfsrc randomForestSRC
## 97 rFerns rFerns
## 98 bartmachine bartMachine
## 99 bcart tgp
## 100 bgp tgp
## 101 bgpllm tgp
## 102 blm tgp
## 103 brnn brnn
## 104 bst bst,rpart
## 105 btgp tgp
## 106 btgpllm tgp
## 107 btlm tgp
## 108 cforest party
## 109 crs crs
## 110 ctree party
## 111 cubist Cubist
## 112 cvglmnet glmnet
## 113 earth earth
## 114 evtree evtree
## 115 extraTrees extraTrees
## 116 FDboost FDboost,mboost
## 117 featureless mlr
## 118 FGAM refund
## 119 fnn FNN
## 120 frbs frbs
## 121 gamboost mboost
## 122 gausspr kernlab
## 123 gbm gbm
## 124 glm stats
## 125 glmboost mboost
## 126 glmnet glmnet
## 127 GPfit GPfit
## 128 h2o.dl h2o
## 129 h2o.gbm h2o
## 130 h2o.glm h2o
## 131 h2o.rf h2o
## 132 ibk RWeka
## 133 kknn kknn
## 134 km DiceKriging
## 135 ksvm kernlab
## 136 laGP laGP
## 137 liblinl2l1svr LiblineaR
## 138 liblinl2l2svr LiblineaR
## 139 lm stats
## 140 mars mda
## 141 mob party,modeltools
## 142 nnet nnet
## 143 nodeHarvest nodeHarvest
## 144 pcr pls
## 145 penalized penalized
## 146 plsr pls
## 147 rf randomForest
## 148 rfsrc randomForestSRC
## 149 ranger ranger
## 150 rknn rknn
## 151 rpart rpart
## 152 RRF RRF
## 153 rsm rsm
## 154 rvm kernlab
## 155 svm e1071
## 156 xgboost xgboost
## 157 crf party,survival
## 158 coxph survival
## 159 cvglmnet glmnet
## 160 gamboost survival,mboost
## 161 gbm gbm
## 162 glmboost survival,mboost
## 163 glmnet glmnet
## 164 rfsrc survival,randomForestSRC
## 165 ranger ranger
## 166 rpart rpart
## note
## 1 `xval` has been set to `0` by default for speed.
## 2 NAs are directly passed to WEKA with `na.action = na.pass`.
## 3 `use_missing_data` has been set to `TRUE` by default to allow missing data support.
## 4 Delegates to `glm` with freely choosable binomial link function via learner parameter `link`. We set 'model' to FALSE by default to save memory.
## 5 `xval` has been set to `0` by default for speed.
## 6 Renamed parameter `learner` to `Learner` due to nameclash with `setHyperPars`. Default changes: `Learner = "ls"`, `xval = 0`, and `maxdepth = 1`.
## 7
## 8 See `?ctree_control` for possible breakage for nominal features with missingness.
## 9 `centers` set to `2` by default.
## 10 See `?ctree_control` for possible breakage for nominal features with missingness.
## 11 The family parameter is set to `binomial` for two-class problems and to `multinomial` otherwise. Factors automatically get converted to dummy columns, ordered factors to integer.\n glmnet uses a global control object for its parameters. mlr resets all control parameters to their defaults\n before setting the specified parameters and after training.\n If you are setting glmnet.control parameters through glmnet.control,\n you need to save and re-set them after running the glmnet learner.
## 12 `output` set to `"softmax"` by default.
## 13
## 14 This learner performs flexible discriminant analysis using the earth algorithm. na.action is set to na.fail and only this is supported.
## 15 `pmutatemajor`, `pmutateminor`, `pcrossover`, `psplit`, and `pprune`,\n are scaled internally to sum to 100.
## 16
## 17 model$C[[1]] is set to quote(classif.glm)
## 18 Argument draw=FALSE is used as default.
## 19 Argument draw=FALSE is used as default.
## 20 Argument draw=FALSE is used as default. Additionally, mod$C[[1]] is set to quote(classif.np)
## 21 Uses only one base learner per functional or scalar covariate.\n Uses the same hyperparameters for every baselearner.\n Currently does not support interaction between scalar covariates.\n Default for family has been set to 'Binomial', as 'Gaussian' is not applicable.
## 22
## 23
## 24
## 25 `family` has been set to `Binomial()` by default. For 'family' 'AUC' and 'AdaExp' probabilities cannot be predicted.
## 26 `m` set to `3` and `max.iter` set to `1` by default.
## 27 Kernel parameters have to be passed directly and not by using the `kpar` list in `gausspr`.\n Note that `fit` has been set to `FALSE` by default for speed.
## 28 `keep.data` is set to FALSE to reduce memory requirements.\nParam 'n.cores' has been to set to '1' by default to suppress parallelization by the package.
## 29
## 30 `family` has been set to `Binomial` by default. For 'family' 'AUC' and 'AdaExp' probabilities cannot be predcited.
## 31 The family parameter is set to `binomial` for two-class problems and to `multinomial` otherwise.\n Factors automatically get converted to dummy columns, ordered factors to integer.\n Parameter `s` (value of the regularization parameter used for predictions) is set to `0.01` by default,\n but needs to be tuned by the user.\n glmnet uses a global control object for its parameters. mlr resets all control parameters to their defaults\n before setting the specified parameters and after training.\n If you are setting glmnet.control parameters through glmnet.control,\n you need to save and re-set them after running the glmnet learner.
## 32 The default value of `missing_values_handling` is `"MeanImputation"`, so missing values are automatically mean-imputed.
## 33 'distribution' is set automatically to 'gaussian'.
## 34 `family` is always set to `"binomial"` to get a binary classifier. The default value of `missing_values_handling` is `"MeanImputation"`, so missing values are automatically mean-imputed.
## 35
## 36
## 37 NAs are directly passed to WEKA with `na.action = na.pass`.
## 38 NAs are directly passed to WEKA with `na.action = na.pass`.
## 39
## 40
## 41 Kernel parameters have to be passed directly and not by using the `kpar` list in `ksvm`. Note that `fit` has been set to `FALSE` by default for speed.
## 42 Learner parameter `predict.method` maps to `method` in `predict.lda`.
## 43
## 44
## 45
## 46 `type = 0` (the default) is primal and `type = 7` is dual problem.
## 47 `type = 2` (the default) is primal and `type = 1` is dual problem.
## 48
## 49 Set `validation = NULL` by default to disable internal test set validation.
## 50 Delegates to `glm` with `family = binomial(link = 'logit')`. We set 'model' to FALSE by default to save memory.
## 51 `fitted` has been set to `FALSE` by default for speed.
## 52
## 53 `keep.fitted` has been set to `FALSE` by default for speed and we use `start.method = "lvq"` for more robust behavior / less technical crashes.
## 54
## 55
## 56
## 57 `err.fct` has been set to `ce` and `linear.output` to FALSE to do classification.
## 58 `linout=TRUE` is hardcoded for regression. `size` has been set to `3` by default.
## 59 `output` set to `softmax` by default. `max.number.of.layers` can be set to control and tune the maximal number of layers specified via `hidden`.
## 60
## 61 NAs are directly passed to WEKA with `na.action = na.pass`.
## 62 Threshold for prediction (`threshold.predict`) has been set to `1` by default.
## 63 NAs are directly passed to WEKA with `na.action = na.pass`.
## 64 trace=FALSE was set by default to disable logging output.
## 65 AIC and BIC penalty types can be selected via the new parameter `cp.type`.
## 66
## 67 Delegates to `glm` with `family = binomial(link = 'probit')`. We set 'model' to FALSE by default to save memory.
## 68 Learner parameter `predict.method` maps to `method` in `predict.qda`.
## 69
## 70 Note that the rf can freeze the R process if trained on a task with 1 feature which is constant. This can happen in feature forward selection, also due to resampling, and you need to remove such features with removeConstantFeatures.
## 71 `na.action` has been set to `"na.impute"` by default to allow missing data support.
## 72 By default, internal parallelization is switched off (`num.threads = 1`), `verbose` output is disabled, `respect.unordered.factors` is set to `order` for all splitrules. If predict.type='prob' we set 'probability=TRUE' in ranger.
## 73 `estimate.error` has been set to `FALSE` by default for speed.
## 74
## 75 k restricted to < 99 as the code allocates arrays of static size
## 76
## 77 `xval` has been set to `0` by default for speed.
## 78
## 79
## 80 `output` set to `"softmax"` by default.
## 81
## 82 Arguments `Q` and `stop` are not yet provided as they depend on the task.
## 83
## 84 All settings are passed directly, rather than through `xgboost`'s `params` argument. `nrounds` has been set to `1` and `verbose` to `0` by default. `num_class` is set internally, so do not set this manually.
## 85 The `predict` method uses `cl_predict` from the `clue` package to compute the cluster memberships for new data. The default `centers = 2` is added so the method runs without setting parameters, but this must in reality of course be changed by the user.
## 86
## 87 A cluster index of NA indicates noise points. Specify `method = 'dist'` if the data should be interpreted as dissimilarity matrix or object. Otherwise Euclidean distances will be used.
## 88
## 89
## 90 `centers` has been set to `2L` by default. The nearest center in kernel distance determines cluster assignment of new data points. Kernel parameters have to be passed directly and not by using the `kpar` list in `kkmeans`
## 91 The `predict` method uses `cl_predict` from the `clue` package to compute the cluster memberships for new data. The default `centers = 2` is added so the method runs without setting parameters, but this must in reality of course be changed by the user.
## 92 Calls MiniBatchKmeans of package ClusterR. Argument `clusters` has default value of 2 if not provided by user.
## 93
## 94 You may have to install the XMeans Weka package: `WPM('install-package', 'XMeans')`.
## 95
## 96 `na.action` has been set to `na.impute` by default to allow missing data support.
## 97
## 98 `use_missing_data` has been set to `TRUE` by default to allow missing data support.
## 99
## 100
## 101
## 102
## 103
## 104 Renamed parameter `learner` to `Learner` due to nameclash with `setHyperPars`. Default changes: `Learner = "ls"`, `xval = 0`, and `maxdepth = 1`.
## 105
## 106
## 107
## 108 See `?ctree_control` for possible breakage for nominal features with missingness.
## 109
## 110 See `?ctree_control` for possible breakage for nominal features with missingness.
## 111
## 112 Factors automatically get converted to dummy columns, ordered factors to integer.\n glmnet uses a global control object for its parameters. mlr resets all control parameters to their defaults\n before setting the specified parameters and after training.\n If you are setting glmnet.control parameters through glmnet.control,\n you need to save and re-set them after running the glmnet learner.
## 113
## 114 `pmutatemajor`, `pmutateminor`, `pcrossover`, `psplit`, and `pprune`,\n are scaled internally to sum to 100.
## 115
## 116 Only allow one base learner for functional covariate and one base learner for scalar covariate, the parameters for these base learners are the same. Also we currently do not support interaction between scalar covariates
## 117
## 118
## 119
## 120
## 121
## 122 Kernel parameters have to be passed directly and not by using the `kpar` list in `gausspr`.\n Note that `fit` has been set to `FALSE` by default for speed.
## 123 `keep.data` is set to FALSE to reduce memory requirements, `distribution` has been set to `"gaussian"` by default.Param 'n.cores' has been to set to '1' by default to suppress parallelization by the package.
## 124 'family' must be a character and every family has its own link, i.e. family = 'gaussian', link.gaussian = 'identity', which is also the default. We set 'model' to FALSE by default to save memory.
## 125
## 126 Factors automatically get converted to dummy columns, ordered factors to integer.\n Parameter `s` (value of the regularization parameter used for predictions) is set to `0.01` by default,\n but needs to be tuned by the user.\n glmnet uses a global control object for its parameters. mlr resets all control parameters to their defaults\n before setting the specified parameters and after training.\n If you are setting glmnet.control parameters through glmnet.control,\n you need to save and re-set them after running the glmnet learner.
## 127 (1) As the optimization routine assumes that the inputs are scaled to the unit hypercube [0,1]^d,\n the input gets scaled for each variable by default. If this is not wanted, scale = FALSE has\n to be set. (2) We replace the GPfit parameter 'corr = list(type = 'exponential',power = 1.95)' to be seperate\n parameters 'type' and 'power', in the case of corr = list(type = 'matern', nu = 0.5), the seperate parameters\n are 'type' and 'matern_nu_k = 0', and nu is computed by 'nu = (2 * matern_nu_k + 1) / 2 = 0.5'\n
## 128 The default value of `missing_values_handling` is `"MeanImputation"`, so missing values are automatically mean-imputed.
## 129
## 130 `family` is always set to `"gaussian"`. The default value of `missing_values_handling` is `"MeanImputation"`, so missing values are automatically mean-imputed.
## 131
## 132
## 133
## 134 In predict, we currently always use `type = "SK"`. The extra parameter `jitter` (default is `FALSE`) enables adding a very small jitter (order 1e-12) to the x-values before prediction, as `predict.km` reproduces the exact y-values of the training data points, when you pass them in, even if the nugget effect is turned on. \n We further introduced `nugget.stability` which sets the `nugget` to `nugget.stability * var(y)` before each training to improve numerical stability. We recommend a setting of 10^-8
## 135 Kernel parameters have to be passed directly and not by using the `kpar` list in `ksvm`. Note that `fit` has been set to `FALSE` by default for speed.
## 136
## 137 Parameter `svr_eps` has been set to `0.1` by default.
## 138 `type = 11` (the default) is primal and `type = 12` is dual problem. Parameter `svr_eps` has been set to `0.1` by default.
## 139
## 140
## 141
## 142 `size` has been set to `3` by default.
## 143
## 144
## 145 trace=FALSE was set by default to disable logging output.
## 146
## 147 See the section about 'regr.randomForest' in `?makeLearner` for information about se estimation. Note that the rf can freeze the R process if trained on a task with 1 feature which is constant. This can happen in feature forward selection, also due to resampling, and you need to remove such features with removeConstantFeatures. keep.inbag is NULL by default but if predict.type = 'se' and se.method = 'jackknife' (the default) then it is automatically set to TRUE.
## 148 `na.action` has been set to `"na.impute"` by default to allow missing data support.
## 149 By default, internal parallelization is switched off (`num.threads = 1`), `verbose` output is disabled, `respect.unordered.factors` is set to `order` for all splitrules. All settings are changeable. `mtry.perc` sets `mtry` to `mtry.perc*getTaskNFeats(.task)`. Default for `mtry` is the floor of square root of number of features in task. SE estimation is mc bias-corrected jackknife after bootstrap, see the section about 'regr.randomForest' in `?makeLearner` for more details.
## 150
## 151 `xval` has been set to `0` by default for speed.
## 152
## 153 You select the order of the regression by using `modelfun = "FO"` (first order), `"TWI"` (two-way interactions, this is with 1st oder terms!) and `"SO"` (full second order).
## 154 Kernel parameters have to be passed directly and not by using the `kpar` list in `rvm`. Note that `fit` has been set to `FALSE` by default for speed.
## 155
## 156 All settings are passed directly, rather than through `xgboost`'s `params` argument. `nrounds` has been set to `1` and `verbose` to `0` by default.
## 157 See `?ctree_control` for possible breakage for nominal features with missingness.
## 158
## 159 Factors automatically get converted to dummy columns, ordered factors to integer.
## 160 `family` has been set to `CoxPH()` by default.
## 161 `keep.data` is set to FALSE to reduce memory requirements.
## 162 `family` has been set to `CoxPH()` by default.
## 163 Factors automatically get converted to dummy columns, ordered factors to integer.Parameter `s` (value of the regularization parameter used for predictions) is set to `0.1` by default, but needs to be tuned by the user. glmnet uses a global control object for its parameters. mlr resets all control parameters to their defaults before setting the specified parametersand after training. If you are setting glmnet.control parameters through glmnet.control,you need to save and re-set them after running the glmnet learner.
## 164 `na.action` has been set to `"na.impute"` by default to allow missing data support.
## 165 By default, internal parallelization is switched off (`num.threads = 1`), `verbose` output is disabled, `respect.unordered.factors` is set to `order` for all splitrules. All settings are changeable.
## 166 `xval` has been set to `0` by default for speed.
## type installed numerics factors ordered missings weights prob
## 1 classif FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 2 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 3 classif FALSE TRUE TRUE FALSE TRUE FALSE TRUE
## 4 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 5 classif FALSE TRUE TRUE FALSE TRUE FALSE TRUE
## 6 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 7 classif FALSE TRUE TRUE FALSE TRUE TRUE TRUE
## 8 classif FALSE TRUE TRUE TRUE TRUE TRUE TRUE
## 9 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 10 classif FALSE TRUE TRUE TRUE TRUE TRUE TRUE
## 11 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 12 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 13 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 14 classif FALSE TRUE TRUE FALSE FALSE TRUE TRUE
## 15 classif FALSE TRUE TRUE TRUE FALSE TRUE TRUE
## 16 classif FALSE TRUE FALSE FALSE FALSE TRUE TRUE
## 17 classif FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 18 classif FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 19 classif FALSE FALSE FALSE FALSE FALSE TRUE TRUE
## 20 classif FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 21 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 22 classif TRUE TRUE TRUE TRUE TRUE FALSE TRUE
## 23 classif FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 24 classif TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 25 classif FALSE TRUE TRUE FALSE FALSE TRUE TRUE
## 26 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 27 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 28 classif TRUE TRUE TRUE FALSE TRUE TRUE TRUE
## 29 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 30 classif FALSE TRUE TRUE FALSE FALSE TRUE TRUE
## 31 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 32 classif TRUE TRUE TRUE FALSE TRUE TRUE TRUE
## 33 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 34 classif TRUE TRUE TRUE FALSE TRUE TRUE TRUE
## 35 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 36 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 37 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 38 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 39 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 40 classif TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 41 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 42 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 43 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 44 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 45 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 46 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 47 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 48 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 49 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 50 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 51 classif TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 52 classif TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 53 classif FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 54 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 55 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 56 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 57 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 58 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 59 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 60 classif FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 61 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 62 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 63 classif TRUE TRUE TRUE FALSE TRUE FALSE TRUE
## 64 classif FALSE TRUE TRUE TRUE FALSE FALSE TRUE
## 65 classif FALSE TRUE TRUE FALSE FALSE TRUE TRUE
## 66 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 67 classif TRUE TRUE TRUE FALSE FALSE TRUE TRUE
## 68 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 69 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 70 classif TRUE TRUE TRUE TRUE FALSE FALSE TRUE
## 71 classif FALSE TRUE TRUE TRUE TRUE TRUE TRUE
## 72 classif TRUE TRUE TRUE TRUE FALSE TRUE TRUE
## 73 classif FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 74 classif FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 75 classif FALSE TRUE FALSE TRUE FALSE FALSE FALSE
## 76 classif FALSE TRUE TRUE TRUE FALSE FALSE TRUE
## 77 classif TRUE TRUE TRUE TRUE TRUE TRUE TRUE
## 78 classif FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 79 classif FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 80 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 81 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 82 classif FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 83 classif TRUE TRUE TRUE FALSE FALSE FALSE TRUE
## 84 classif TRUE TRUE FALSE FALSE TRUE TRUE TRUE
## 85 cluster FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 86 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 87 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 88 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 89 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 90 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 91 cluster FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 92 cluster FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 93 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 94 cluster TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 95 multilabel FALSE TRUE TRUE TRUE TRUE TRUE TRUE
## 96 multilabel FALSE TRUE TRUE FALSE TRUE TRUE TRUE
## 97 multilabel FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 98 regr FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 99 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 100 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 101 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 102 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 103 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 104 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 105 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 106 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 107 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 108 regr FALSE TRUE TRUE TRUE TRUE TRUE FALSE
## 109 regr FALSE TRUE TRUE FALSE FALSE TRUE FALSE
## 110 regr FALSE TRUE TRUE TRUE TRUE TRUE FALSE
## 111 regr FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 112 regr TRUE TRUE TRUE FALSE FALSE TRUE FALSE
## 113 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 114 regr FALSE TRUE TRUE TRUE FALSE TRUE FALSE
## 115 regr FALSE TRUE FALSE FALSE FALSE TRUE FALSE
## 116 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 117 regr TRUE TRUE TRUE TRUE TRUE FALSE FALSE
## 118 regr FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 119 regr TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 120 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 121 regr FALSE TRUE TRUE FALSE FALSE TRUE FALSE
## 122 regr TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 123 regr TRUE TRUE TRUE FALSE TRUE TRUE FALSE
## 124 regr TRUE TRUE TRUE FALSE FALSE TRUE FALSE
## 125 regr FALSE TRUE TRUE FALSE FALSE TRUE FALSE
## 126 regr TRUE TRUE TRUE TRUE FALSE TRUE FALSE
## 127 regr TRUE TRUE FALSE FALSE FALSE FALSE FALSE
## 128 regr TRUE TRUE TRUE FALSE TRUE TRUE FALSE
## 129 regr TRUE TRUE TRUE FALSE TRUE FALSE FALSE
## 130 regr TRUE TRUE TRUE FALSE TRUE TRUE FALSE
## 131 regr TRUE TRUE TRUE FALSE TRUE FALSE FALSE
## 132 regr TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 133 regr TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 134 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 135 regr TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 136 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 137 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 138 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 139 regr TRUE TRUE TRUE FALSE FALSE TRUE FALSE
## 140 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 141 regr FALSE TRUE TRUE FALSE FALSE TRUE FALSE
## 142 regr TRUE TRUE TRUE FALSE FALSE TRUE FALSE
## 143 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 144 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 145 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 146 regr FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 147 regr TRUE TRUE TRUE TRUE FALSE FALSE FALSE
## 148 regr FALSE TRUE TRUE TRUE TRUE TRUE FALSE
## 149 regr TRUE TRUE TRUE TRUE FALSE TRUE FALSE
## 150 regr FALSE TRUE FALSE TRUE FALSE FALSE FALSE
## 151 regr TRUE TRUE TRUE TRUE TRUE TRUE FALSE
## 152 regr FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 153 regr FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 154 regr TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 155 regr TRUE TRUE TRUE FALSE FALSE FALSE FALSE
## 156 regr TRUE TRUE FALSE FALSE TRUE TRUE FALSE
## 157 surv FALSE TRUE TRUE TRUE TRUE TRUE FALSE
## 158 surv TRUE TRUE TRUE FALSE FALSE TRUE FALSE
## 159 surv TRUE TRUE TRUE TRUE FALSE TRUE FALSE
## 160 surv FALSE TRUE TRUE TRUE FALSE TRUE FALSE
## 161 surv TRUE TRUE TRUE FALSE TRUE TRUE FALSE
## 162 surv FALSE TRUE TRUE TRUE FALSE TRUE FALSE
## 163 surv TRUE TRUE TRUE TRUE FALSE TRUE FALSE
## 164 surv FALSE TRUE TRUE TRUE TRUE TRUE FALSE
## 165 surv TRUE TRUE TRUE TRUE FALSE TRUE FALSE
## 166 surv TRUE TRUE TRUE TRUE TRUE TRUE FALSE
## oneclass twoclass multiclass class.weights featimp oobpreds functionals
## 1 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 2 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 3 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 4 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 5 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 6 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 7 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 8 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 9 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 10 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 11 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 12 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 13 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 14 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 15 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 16 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 17 FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 18 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 19 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 20 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 21 FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 22 FALSE TRUE TRUE FALSE FALSE FALSE TRUE
## 23 FALSE TRUE FALSE FALSE FALSE FALSE TRUE
## 24 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 25 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 26 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 27 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 28 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 29 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 30 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 31 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 32 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 33 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 34 FALSE TRUE FALSE FALSE TRUE FALSE FALSE
## 35 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 36 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 37 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 38 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 39 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 40 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 41 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 42 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 43 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 44 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 45 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 46 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 47 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 48 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 49 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 50 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 51 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 52 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 53 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 54 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 55 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 56 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 57 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 58 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 59 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 60 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 61 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 62 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 63 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 64 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 65 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 66 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 67 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 68 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 69 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 70 FALSE TRUE TRUE TRUE TRUE TRUE FALSE
## 71 FALSE TRUE TRUE FALSE TRUE TRUE FALSE
## 72 FALSE TRUE TRUE FALSE TRUE TRUE FALSE
## 73 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 74 FALSE TRUE TRUE FALSE FALSE TRUE FALSE
## 75 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 76 FALSE TRUE FALSE FALSE FALSE FALSE FALSE
## 77 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 78 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 79 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 80 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 81 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 82 FALSE TRUE TRUE FALSE FALSE FALSE FALSE
## 83 FALSE TRUE TRUE TRUE FALSE FALSE FALSE
## 84 FALSE TRUE TRUE FALSE TRUE FALSE FALSE
## 85 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 86 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 87 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 88 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 89 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 90 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 91 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 92 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 93 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 94 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 95 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 96 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 97 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 98 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 99 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 100 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 101 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 102 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 103 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 104 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 105 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 106 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 107 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 108 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 109 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 110 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 111 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 112 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 113 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 114 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 115 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 116 FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 117 FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 118 FALSE FALSE FALSE FALSE FALSE FALSE TRUE
## 119 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 120 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 121 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 122 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 123 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 124 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 125 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 126 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 127 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 128 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 129 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 130 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 131 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 132 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 133 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 134 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 135 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 136 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 137 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 138 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 139 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 140 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 141 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 142 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 143 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 144 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 145 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 146 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 147 FALSE FALSE FALSE FALSE TRUE TRUE FALSE
## 148 FALSE FALSE FALSE FALSE TRUE TRUE FALSE
## 149 FALSE FALSE FALSE FALSE TRUE TRUE FALSE
## 150 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 151 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 152 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 153 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 154 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 155 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 156 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 157 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 158 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 159 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 160 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 161 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 162 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 163 FALSE FALSE FALSE FALSE FALSE FALSE FALSE
## 164 FALSE FALSE FALSE FALSE TRUE TRUE FALSE
## 165 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## 166 FALSE FALSE FALSE FALSE TRUE FALSE FALSE
## single.functional se lcens rcens icens
## 1 FALSE FALSE FALSE FALSE FALSE
## 2 FALSE FALSE FALSE FALSE FALSE
## 3 FALSE FALSE FALSE FALSE FALSE
## 4 FALSE FALSE FALSE FALSE FALSE
## 5 FALSE FALSE FALSE FALSE FALSE
## 6 FALSE FALSE FALSE FALSE FALSE
## 7 FALSE FALSE FALSE FALSE FALSE
## 8 FALSE FALSE FALSE FALSE FALSE
## 9 FALSE FALSE FALSE FALSE FALSE
## 10 FALSE FALSE FALSE FALSE FALSE
## 11 FALSE FALSE FALSE FALSE FALSE
## 12 FALSE FALSE FALSE FALSE FALSE
## 13 FALSE FALSE FALSE FALSE FALSE
## 14 FALSE FALSE FALSE FALSE FALSE
## 15 FALSE FALSE FALSE FALSE FALSE
## 16 FALSE FALSE FALSE FALSE FALSE
## 17 FALSE FALSE FALSE FALSE FALSE
## 18 TRUE FALSE FALSE FALSE FALSE
## 19 TRUE FALSE FALSE FALSE FALSE
## 20 TRUE FALSE FALSE FALSE FALSE
## 21 FALSE FALSE FALSE FALSE FALSE
## 22 FALSE FALSE FALSE FALSE FALSE
## 23 TRUE FALSE FALSE FALSE FALSE
## 24 FALSE FALSE FALSE FALSE FALSE
## 25 FALSE FALSE FALSE FALSE FALSE
## 26 FALSE FALSE FALSE FALSE FALSE
## 27 FALSE FALSE FALSE FALSE FALSE
## 28 FALSE FALSE FALSE FALSE FALSE
## 29 FALSE FALSE FALSE FALSE FALSE
## 30 FALSE FALSE FALSE FALSE FALSE
## 31 FALSE FALSE FALSE FALSE FALSE
## 32 FALSE FALSE FALSE FALSE FALSE
## 33 FALSE FALSE FALSE FALSE FALSE
## 34 FALSE FALSE FALSE FALSE FALSE
## 35 FALSE FALSE FALSE FALSE FALSE
## 36 FALSE FALSE FALSE FALSE FALSE
## 37 FALSE FALSE FALSE FALSE FALSE
## 38 FALSE FALSE FALSE FALSE FALSE
## 39 FALSE FALSE FALSE FALSE FALSE
## 40 FALSE FALSE FALSE FALSE FALSE
## 41 FALSE FALSE FALSE FALSE FALSE
## 42 FALSE FALSE FALSE FALSE FALSE
## 43 FALSE FALSE FALSE FALSE FALSE
## 44 FALSE FALSE FALSE FALSE FALSE
## 45 FALSE FALSE FALSE FALSE FALSE
## 46 FALSE FALSE FALSE FALSE FALSE
## 47 FALSE FALSE FALSE FALSE FALSE
## 48 FALSE FALSE FALSE FALSE FALSE
## 49 FALSE FALSE FALSE FALSE FALSE
## 50 FALSE FALSE FALSE FALSE FALSE
## 51 FALSE FALSE FALSE FALSE FALSE
## 52 FALSE FALSE FALSE FALSE FALSE
## 53 FALSE FALSE FALSE FALSE FALSE
## 54 FALSE FALSE FALSE FALSE FALSE
## 55 FALSE FALSE FALSE FALSE FALSE
## 56 FALSE FALSE FALSE FALSE FALSE
## 57 FALSE FALSE FALSE FALSE FALSE
## 58 FALSE FALSE FALSE FALSE FALSE
## 59 FALSE FALSE FALSE FALSE FALSE
## 60 FALSE FALSE FALSE FALSE FALSE
## 61 FALSE FALSE FALSE FALSE FALSE
## 62 FALSE FALSE FALSE FALSE FALSE
## 63 FALSE FALSE FALSE FALSE FALSE
## 64 FALSE FALSE FALSE FALSE FALSE
## 65 FALSE FALSE FALSE FALSE FALSE
## 66 FALSE FALSE FALSE FALSE FALSE
## 67 FALSE FALSE FALSE FALSE FALSE
## 68 FALSE FALSE FALSE FALSE FALSE
## 69 FALSE FALSE FALSE FALSE FALSE
## 70 FALSE FALSE FALSE FALSE FALSE
## 71 FALSE FALSE FALSE FALSE FALSE
## 72 FALSE FALSE FALSE FALSE FALSE
## 73 FALSE FALSE FALSE FALSE FALSE
## 74 FALSE FALSE FALSE FALSE FALSE
## 75 FALSE FALSE FALSE FALSE FALSE
## 76 FALSE FALSE FALSE FALSE FALSE
## 77 FALSE FALSE FALSE FALSE FALSE
## 78 FALSE FALSE FALSE FALSE FALSE
## 79 FALSE FALSE FALSE FALSE FALSE
## 80 FALSE FALSE FALSE FALSE FALSE
## 81 FALSE FALSE FALSE FALSE FALSE
## 82 FALSE FALSE FALSE FALSE FALSE
## 83 FALSE FALSE FALSE FALSE FALSE
## 84 FALSE FALSE FALSE FALSE FALSE
## 85 FALSE FALSE FALSE FALSE FALSE
## 86 FALSE FALSE FALSE FALSE FALSE
## 87 FALSE FALSE FALSE FALSE FALSE
## 88 FALSE FALSE FALSE FALSE FALSE
## 89 FALSE FALSE FALSE FALSE FALSE
## 90 FALSE FALSE FALSE FALSE FALSE
## 91 FALSE FALSE FALSE FALSE FALSE
## 92 FALSE FALSE FALSE FALSE FALSE
## 93 FALSE FALSE FALSE FALSE FALSE
## 94 FALSE FALSE FALSE FALSE FALSE
## 95 FALSE FALSE FALSE FALSE FALSE
## 96 FALSE FALSE FALSE FALSE FALSE
## 97 FALSE FALSE FALSE FALSE FALSE
## 98 FALSE FALSE FALSE FALSE FALSE
## 99 FALSE TRUE FALSE FALSE FALSE
## 100 FALSE TRUE FALSE FALSE FALSE
## 101 FALSE TRUE FALSE FALSE FALSE
## 102 FALSE TRUE FALSE FALSE FALSE
## 103 FALSE FALSE FALSE FALSE FALSE
## 104 FALSE FALSE FALSE FALSE FALSE
## 105 FALSE TRUE FALSE FALSE FALSE
## 106 FALSE TRUE FALSE FALSE FALSE
## 107 FALSE TRUE FALSE FALSE FALSE
## 108 FALSE FALSE FALSE FALSE FALSE
## 109 FALSE TRUE FALSE FALSE FALSE
## 110 FALSE FALSE FALSE FALSE FALSE
## 111 FALSE FALSE FALSE FALSE FALSE
## 112 FALSE FALSE FALSE FALSE FALSE
## 113 FALSE FALSE FALSE FALSE FALSE
## 114 FALSE FALSE FALSE FALSE FALSE
## 115 FALSE FALSE FALSE FALSE FALSE
## 116 FALSE FALSE FALSE FALSE FALSE
## 117 FALSE FALSE FALSE FALSE FALSE
## 118 TRUE FALSE FALSE FALSE FALSE
## 119 FALSE FALSE FALSE FALSE FALSE
## 120 FALSE FALSE FALSE FALSE FALSE
## 121 FALSE FALSE FALSE FALSE FALSE
## 122 FALSE TRUE FALSE FALSE FALSE
## 123 FALSE FALSE FALSE FALSE FALSE
## 124 FALSE TRUE FALSE FALSE FALSE
## 125 FALSE FALSE FALSE FALSE FALSE
## 126 FALSE FALSE FALSE FALSE FALSE
## 127 FALSE TRUE FALSE FALSE FALSE
## 128 FALSE FALSE FALSE FALSE FALSE
## 129 FALSE FALSE FALSE FALSE FALSE
## 130 FALSE FALSE FALSE FALSE FALSE
## 131 FALSE FALSE FALSE FALSE FALSE
## 132 FALSE FALSE FALSE FALSE FALSE
## 133 FALSE FALSE FALSE FALSE FALSE
## 134 FALSE TRUE FALSE FALSE FALSE
## 135 FALSE FALSE FALSE FALSE FALSE
## 136 FALSE TRUE FALSE FALSE FALSE
## 137 FALSE FALSE FALSE FALSE FALSE
## 138 FALSE FALSE FALSE FALSE FALSE
## 139 FALSE TRUE FALSE FALSE FALSE
## 140 FALSE FALSE FALSE FALSE FALSE
## 141 FALSE FALSE FALSE FALSE FALSE
## 142 FALSE FALSE FALSE FALSE FALSE
## 143 FALSE FALSE FALSE FALSE FALSE
## 144 FALSE FALSE FALSE FALSE FALSE
## 145 FALSE FALSE FALSE FALSE FALSE
## 146 FALSE FALSE FALSE FALSE FALSE
## 147 FALSE TRUE FALSE FALSE FALSE
## 148 FALSE FALSE FALSE FALSE FALSE
## 149 FALSE TRUE FALSE FALSE FALSE
## 150 FALSE FALSE FALSE FALSE FALSE
## 151 FALSE FALSE FALSE FALSE FALSE
## 152 FALSE FALSE FALSE FALSE FALSE
## 153 FALSE FALSE FALSE FALSE FALSE
## 154 FALSE FALSE FALSE FALSE FALSE
## 155 FALSE FALSE FALSE FALSE FALSE
## 156 FALSE FALSE FALSE FALSE FALSE
## 157 FALSE FALSE FALSE FALSE FALSE
## 158 FALSE FALSE FALSE FALSE FALSE
## 159 FALSE FALSE FALSE FALSE FALSE
## 160 FALSE FALSE FALSE FALSE FALSE
## 161 FALSE FALSE FALSE FALSE FALSE
## 162 FALSE FALSE FALSE FALSE FALSE
## 163 FALSE FALSE FALSE FALSE FALSE
## 164 FALSE FALSE FALSE FALSE FALSE
## 165 FALSE FALSE FALSE FALSE FALSE
## 166 FALSE FALSE FALSE FALSE FALSE
(dt_task <- makeClassifTask(data = train, target = "diabetes"))
## Supervised task: train
## Type: classif
## Target: diabetes
## Observations: 614
## Features:
## numerics factors ordered functionals
## 8 0 0 0
## Missings: FALSE
## Has weights: FALSE
## Has blocking: FALSE
## Has coordinates: FALSE
## Classes: 2
## neg pos
## 386 228
## Positive class: neg
(dt_prob <- makeLearner('classif.gbm', predict.type = "prob"))
## Learner classif.gbm from package gbm
## Type: classif
## Name: Gradient Boosting Machine; Short name: gbm
## Class: classif.gbm
## Properties: twoclass,multiclass,missings,numerics,factors,prob,weights,featimp
## Predict-Type: prob
## Hyperparameters: keep.data=FALSE
library(FSelector)
listFilterMethods()
## id package
## 1 anova.test
## 2 auc
## 3 carscore care
## 4 FSelector_chi.squared FSelector
## 5 FSelector_gain.ratio FSelector
## 6 FSelector_information.gain FSelector
## 7 FSelector_oneR FSelector
## 8 FSelector_relief FSelector
## 9 FSelector_symmetrical.uncertainty FSelector
## 10 FSelectorRcpp_gain.ratio FSelectorRcpp
## 11 FSelectorRcpp_information.gain FSelectorRcpp
## 12 FSelectorRcpp_relief FSelectorRcpp
## 13 FSelectorRcpp_symmetrical.uncertainty FSelectorRcpp
## 14 kruskal.test
## 15 linear.correlation
## 16 mrmr mRMRe
## 17 party_cforest.importance party
## 18 permutation.importance
## 19 praznik_CMIM praznik
## 20 praznik_DISR praznik
## 21 praznik_JMI praznik
## 22 praznik_JMIM praznik
## 23 praznik_MIM praznik
## 24 praznik_MRMR praznik
## 25 praznik_NJMIM praznik
## 26 randomForest_importance randomForest
## 27 randomForestSRC_importance randomForestSRC
## 28 randomForestSRC_var.select randomForestSRC
## 29 ranger_impurity ranger
## 30 ranger_permutation ranger
## 31 rank.correlation
## 32 univariate.model.score
## 33 variance
## desc
## 1 ANOVA Test for binary and multiclass ...
## 2 AUC filter for binary classification ...
## 3 CAR scores
## 4 Chi-squared statistic of independence...
## 5 Chi-squared statistic of independence...
## 6 Entropy-based information gain betwee...
## 7 oneR association rule
## 8 RELIEF algorithm
## 9 Entropy-based symmetrical uncertainty...
## 10 Entropy-based Filters: Algorithms tha...
## 11 Entropy-based Filters: Algorithms tha...
## 12 RELIEF algorithm
## 13 Entropy-based Filters: Algorithms tha...
## 14 Kruskal Test for binary and multiclas...
## 15 Pearson correlation between feature a...
## 16 Minimum redundancy, maximum relevance...
## 17 Permutation importance of random fore...
## 18 Aggregated difference between feature...
## 19 Minimal conditional mutual informatio...
## 20 Double input symmetrical relevance fi...
## 21 Joint mutual information filter
## 22 Minimal joint mutual information maxi...
## 23 conditional mutual information based ...
## 24 Minimum redundancy maximal relevancy ...
## 25 Minimal normalised joint mutual infor...
## 26 Importance based on OOB-accuracy or n...
## 27 Importance of random forests fitted i...
## 28 Minimal depth of / variable hunting v...
## 29 Variable importance based on ranger i...
## 30 Variable importance based on ranger p...
## 31 Spearman's correlation between featur...
## 32 Resamples an mlr learner for each inp...
## 33 A simple variance filter
listFilterEnsembleMethods()
## id
## 1 E-Borda
## 2 E-max
## 3 E-mean
## 4 E-median
## 5 E-min
## desc
## 1 Borda ensemble filter. Takes the sum across all base filter methods for each feature.
## 2 Maximum ensemble filter. Takes the best maximum value across all base filter methods for each feature.
## 3 Mean ensemble filter. Takes the mean across all base filter methods for each feature.
## 4 Median ensemble filter. Takes the median across all base filter methods for each feature.
## 5 Minimum ensemble filter. Takes the best minimum value across all base filter methods for each feature.
generateFilterValuesData(dt_task, method = "FSelector_information.gain") %>%
plotFilterValues() +
theme_bw() +
labs(x = "feature",
y = "information gain",
title = "Information gain of features in GBM",
caption = "Source: Pima Indians Diabetes Database")
feat_imp_tpr <- generateFeatureImportanceData(task = dt_task,
learner = dt_prob,
measure = tpr,
interaction = FALSE)
## Distribution not specified, assuming bernoulli ...
feat_imp_tpr$res %>%
gather() %>%
ggplot(aes(x = reorder(key, value), y = value)) +
geom_bar(stat = "identity") +
labs(x = "feature",
title = "True positive rate of features in GBM",
subtitle = "calculated with permutation importance",
caption = "Source: Pima Indians Diabetes Database")
feat_imp_auc <- generateFeatureImportanceData(task = dt_task,
learner = dt_prob,
measure = auc,
interaction = FALSE)
## Distribution not specified, assuming bernoulli ...
feat_imp_auc$res %>%
gather() %>%
ggplot(aes(x = reorder(key, value), y = value)) +
geom_bar(stat = "identity") +
labs(x = "feature",
title = "Area under the curve of features in GBM",
subtitle = "calculated with permutation importance",
caption = "Source: Pima Indians Diabetes Database")
set.seed(1000)
train <- dplyr::select(train, -pedigree, -pressure, -triceps)
test <- dplyr::select(test, -pedigree, -pressure, -triceps)
list( train = summary(train), test = summary(test) )
## $train
## pregnant glucose insulin mass
## Min. : 0.000 Min. : 0.0 Min. : 0.00 Min. : 0.00
## 1st Qu.: 1.000 1st Qu.:100.0 1st Qu.: 0.00 1st Qu.:27.10
## Median : 3.000 Median :119.0 Median : 36.50 Median :32.00
## Mean : 3.894 Mean :123.1 Mean : 81.65 Mean :31.92
## 3rd Qu.: 6.000 3rd Qu.:143.0 3rd Qu.:131.50 3rd Qu.:36.38
## Max. :17.000 Max. :199.0 Max. :846.00 Max. :59.40
## age diabetes
## Min. :21.00 neg:386
## 1st Qu.:24.00 pos:228
## Median :29.00
## Mean :33.42
## 3rd Qu.:41.00
## Max. :81.00
##
## $test
## pregnant glucose insulin mass
## Min. : 0.000 Min. : 0.0 Min. : 0.0 Min. : 0.00
## 1st Qu.: 1.000 1st Qu.: 93.0 1st Qu.: 0.0 1st Qu.:27.80
## Median : 2.000 Median :108.0 Median : 20.5 Median :32.40
## Mean : 3.649 Mean :112.3 Mean : 72.4 Mean :32.29
## 3rd Qu.: 6.000 3rd Qu.:133.8 3rd Qu.:100.0 3rd Qu.:36.88
## Max. :14.000 Max. :197.0 Max. :744.0 Max. :67.10
## age diabetes
## Min. :21.00 neg:114
## 1st Qu.:23.25 pos: 40
## Median :29.00
## Mean :32.54
## 3rd Qu.:39.75
## Max. :67.00
(dt_task <- makeClassifTask(data = train, target = "diabetes"))
## Supervised task: train
## Type: classif
## Target: diabetes
## Observations: 614
## Features:
## numerics factors ordered functionals
## 5 0 0 0
## Missings: FALSE
## Has weights: FALSE
## Has blocking: FALSE
## Has coordinates: FALSE
## Classes: 2
## neg pos
## 386 228
## Positive class: neg
getParamSet("classif.gbm")
## Type len Def
## distribution discrete - bernoulli
## n.trees integer - 100
## cv.folds integer - 0
## interaction.depth integer - 1
## n.minobsinnode integer - 10
## shrinkage numeric - 0.1
## bag.fraction numeric - 0.5
## train.fraction numeric - 1
## keep.data logical - TRUE
## verbose logical - FALSE
## n.cores integer - 1
## Constr Req Tunable Trafo
## distribution gaussian,bernoulli,huberized,adaboost... - TRUE -
## n.trees 1 to Inf - TRUE -
## cv.folds -Inf to Inf - TRUE -
## interaction.depth 1 to Inf - TRUE -
## n.minobsinnode 1 to Inf - TRUE -
## shrinkage 0 to Inf - TRUE -
## bag.fraction 0 to 1 - TRUE -
## train.fraction 0 to 1 - TRUE -
## keep.data - - FALSE -
## verbose - - FALSE -
## n.cores -Inf to Inf - FALSE -
dt_param <- makeParamSet(
makeIntegerParam("n.trees", lower = 20, upper = 150),
makeNumericParam("shrinkage", lower = 0.01, upper = 0.1))
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("CV",
iters = 3L,
stratify = TRUE)
set.seed(1000)
(dt_tuneparam <- tuneParams(learner = dt_prob,
resampling = rdesc,
measures = list(tpr,auc, fnr, mmce, tnr, setAggregation(tpr, test.sd)),
par.set = dt_param,
control = ctrl,
task = dt_task,
show.info = TRUE))
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Distribution not specified, assuming bernoulli ...
## Tune result:
## Op. pars: n.trees=20; shrinkage=0.02
## tpr.test.mean=1.0000000,auc.test.mean=0.7878691,fnr.test.mean=0.0000000,mmce.test.mean=0.3713375,tnr.test.mean=0.0000000,tpr.test.sd=0.0000000
data = generateHyperParsEffectData(dt_tuneparam,
partial.dep = TRUE)
plotHyperParsEffect(data, x = "n.trees", y = "tpr.test.mean", partial.dep.learn = makeLearner("regr.gbm"))
plotHyperParsEffect(data, x = "shrinkage", y = "tpr.test.mean", partial.dep.learn = makeLearner("regr.gbm"))
plotHyperParsEffect(data,
x = "n.trees",
y = "shrinkage",
z = "tpr.test.mean",
plot.type = "heatmap",
partial.dep.learn = makeLearner("regr.gbm")) +
theme_bw() +
labs(title = "Hyperparameter effects data",
subtitle = "of GBM model with reduced feature set",
caption = "Source: Pima Indians Diabetes Database")
list( `Optimal HyperParameters` = dt_tuneparam$x,
`Optimal Metrics` = dt_tuneparam$y )
## $`Optimal HyperParameters`
## $`Optimal HyperParameters`$n.trees
## [1] 20
##
## $`Optimal HyperParameters`$shrinkage
## [1] 0.02
##
##
## $`Optimal Metrics`
## tpr.test.mean auc.test.mean fnr.test.mean mmce.test.mean tnr.test.mean
## 1.0000000 0.7878691 0.0000000 0.3713375 0.0000000
## tpr.test.sd
## 0.0000000
gbm_final <- setHyperPars(dt_prob, par.vals = dt_tuneparam$x)
set.seed(1000)
gbm_final_train <- train(learner = gbm_final, task = dt_task)
## Distribution not specified, assuming bernoulli ...
getLearnerModel(gbm_final_train)
## gbm::gbm(formula = f, data = d, n.trees = 20L, shrinkage = 0.02,
## keep.data = FALSE)
## A gradient boosted model with bernoulli loss function.
## 20 iterations were performed.
## There were 5 predictors of which 3 had non-zero influence.
rpart & rpart.plot)library(rpart)
library(rpart.plot)
rpart_tree <- rpart(diabetes ~ .,
data = train,
method = "class")
rpart.plot(rpart_tree,
roundint=FALSE,
type = 3,
clip.right.labs = FALSE)
rpart.rules(rpart_tree, roundint = FALSE)
## diabetes
## 0.05 when glucose < 128 & mass < 27 & age >= 29
## 0.10 when glucose < 128 & age < 29
## 0.17 when glucose is 128 to 146 & mass < 30
## 0.25 when glucose >= 146 & mass < 30 & age < 29
## 0.28 when glucose < 128 & mass >= 29 & age >= 29 & insulin < 143
## 0.38 when glucose is 128 to 158 & mass is 32 to 42 & age < 43
## 0.62 when glucose >= 146 & mass < 30 & age >= 29
## 0.63 when glucose < 128 & mass is 27 to 29 & age >= 29 & insulin < 143
## 0.77 when glucose < 128 & mass >= 27 & age >= 29 & insulin >= 143
## 0.82 when glucose is 128 to 158 & mass >= 42 & age < 43
## 0.86 when glucose is 128 to 158 & mass >= 30 & age >= 43
## 0.86 when glucose >= 158 & mass >= 30
## 0.88 when glucose is 128 to 158 & mass is 30 to 32 & age < 43
set.seed(1000)
(gbm_final_predict <- predict(gbm_final_train, newdata = test))
## Prediction: 154 observations
## predict.type: prob
## threshold: neg=0.50,pos=0.50
## time: 0.00
## truth prob.pos prob.neg response
## 12 pos 0.4807717 0.5192283 neg
## 18 pos 0.3229851 0.6770149 neg
## 19 neg 0.3229851 0.6770149 neg
## 20 pos 0.3300235 0.6699765 neg
## 34 neg 0.3091184 0.6908816 neg
## 38 pos 0.3229851 0.6770149 neg
## ... (#rows: 154, #cols: 4)
gbm_final_predict %>% calculateROCMeasures()
## predicted
## true neg pos
## neg 114 0 tpr: 1 fnr: 0
## pos 40 0 fpr: 1 tnr: 0
## ppv: 0.74 for: NaN lrp: 1 acc: 0.74
## fdr: 0.26 npv: NaN lrm: NaN dor: NaN
##
##
## Abbreviations:
## tpr - True positive rate (Sensitivity, Recall)
## fpr - False positive rate (Fall-out)
## fnr - False negative rate (Miss rate)
## tnr - True negative rate (Specificity)
## ppv - Positive predictive value (Precision)
## for - False omission rate
## lrp - Positive likelihood ratio (LR+)
## fdr - False discovery rate
## npv - Negative predictive value
## acc - Accuracy
## lrm - Negative likelihood ratio (LR-)
## dor - Diagnostic odds ratio
model_performance <- performance(gbm_final_predict,
measures = list(tpr, auc, mmce, acc, tnr)) %>%
as.data.frame(row.names = c("True Positive Rate","Area Under Curve", "Mean Misclassification Error","Accuracy","True Negative Rate"))
model_performance
## .
## True Positive Rate 1.0000000
## Area Under Curve 0.7695175
## Mean Misclassification Error 0.2597403
## Accuracy 0.7402597
## True Negative Rate 0.0000000
gbm_final_threshold <- generateThreshVsPerfData(gbm_final_predict,
measures = list(tpr, auc, mmce, tnr))
gbm_final_threshold %>%
plotROCCurves() +
geom_point() +
theme_bw() +
labs(title = "ROC curve from predictions",
subtitle = "of GBM model with reduced feature set",
caption = "Source: Pima Indians Diabetes Database")
gbm_final_threshold %>%
plotThreshVsPerf() +
geom_point() +
theme_bw() +
labs(title = "Threshold vs. performance",
subtitle = "for 2-class classification of GBM model with reduced feature set",
caption = "Source: Pima Indians Diabetes Database")
gbm_final_threshold$data
## tpr auc mmce tnr threshold
## 1 1.0000000 0.7695175 0.2597403 0.000 0.00000000
## 2 1.0000000 0.7695175 0.2597403 0.000 0.01010101
## 3 1.0000000 0.7695175 0.2597403 0.000 0.02020202
## 4 1.0000000 0.7695175 0.2597403 0.000 0.03030303
## 5 1.0000000 0.7695175 0.2597403 0.000 0.04040404
## 6 1.0000000 0.7695175 0.2597403 0.000 0.05050505
## 7 1.0000000 0.7695175 0.2597403 0.000 0.06060606
## 8 1.0000000 0.7695175 0.2597403 0.000 0.07070707
## 9 1.0000000 0.7695175 0.2597403 0.000 0.08080808
## 10 1.0000000 0.7695175 0.2597403 0.000 0.09090909
## 11 1.0000000 0.7695175 0.2597403 0.000 0.10101010
## 12 1.0000000 0.7695175 0.2597403 0.000 0.11111111
## 13 1.0000000 0.7695175 0.2597403 0.000 0.12121212
## 14 1.0000000 0.7695175 0.2597403 0.000 0.13131313
## 15 1.0000000 0.7695175 0.2597403 0.000 0.14141414
## 16 1.0000000 0.7695175 0.2597403 0.000 0.15151515
## 17 1.0000000 0.7695175 0.2597403 0.000 0.16161616
## 18 1.0000000 0.7695175 0.2597403 0.000 0.17171717
## 19 1.0000000 0.7695175 0.2597403 0.000 0.18181818
## 20 1.0000000 0.7695175 0.2597403 0.000 0.19191919
## 21 1.0000000 0.7695175 0.2597403 0.000 0.20202020
## 22 1.0000000 0.7695175 0.2597403 0.000 0.21212121
## 23 1.0000000 0.7695175 0.2597403 0.000 0.22222222
## 24 1.0000000 0.7695175 0.2597403 0.000 0.23232323
## 25 1.0000000 0.7695175 0.2597403 0.000 0.24242424
## 26 1.0000000 0.7695175 0.2597403 0.000 0.25252525
## 27 1.0000000 0.7695175 0.2597403 0.000 0.26262626
## 28 1.0000000 0.7695175 0.2597403 0.000 0.27272727
## 29 1.0000000 0.7695175 0.2597403 0.000 0.28282828
## 30 1.0000000 0.7695175 0.2597403 0.000 0.29292929
## 31 1.0000000 0.7695175 0.2597403 0.000 0.30303030
## 32 1.0000000 0.7695175 0.2597403 0.000 0.31313131
## 33 1.0000000 0.7695175 0.2597403 0.000 0.32323232
## 34 1.0000000 0.7695175 0.2597403 0.000 0.33333333
## 35 1.0000000 0.7695175 0.2597403 0.000 0.34343434
## 36 1.0000000 0.7695175 0.2597403 0.000 0.35353535
## 37 1.0000000 0.7695175 0.2597403 0.000 0.36363636
## 38 1.0000000 0.7695175 0.2597403 0.000 0.37373737
## 39 1.0000000 0.7695175 0.2597403 0.000 0.38383838
## 40 1.0000000 0.7695175 0.2597403 0.000 0.39393939
## 41 1.0000000 0.7695175 0.2597403 0.000 0.40404040
## 42 1.0000000 0.7695175 0.2597403 0.000 0.41414141
## 43 1.0000000 0.7695175 0.2597403 0.000 0.42424242
## 44 1.0000000 0.7695175 0.2597403 0.000 0.43434343
## 45 1.0000000 0.7695175 0.2597403 0.000 0.44444444
## 46 1.0000000 0.7695175 0.2597403 0.000 0.45454545
## 47 1.0000000 0.7695175 0.2597403 0.000 0.46464646
## 48 1.0000000 0.7695175 0.2597403 0.000 0.47474747
## 49 1.0000000 0.7695175 0.2597403 0.000 0.48484848
## 50 1.0000000 0.7695175 0.2597403 0.000 0.49494949
## 51 1.0000000 0.7695175 0.2597403 0.000 0.50505051
## 52 1.0000000 0.7695175 0.2597403 0.000 0.51515152
## 53 0.9912281 0.7695175 0.2142857 0.200 0.52525253
## 54 0.9824561 0.7695175 0.2012987 0.275 0.53535354
## 55 0.9736842 0.7695175 0.2077922 0.275 0.54545455
## 56 0.9298246 0.7695175 0.2207792 0.350 0.55555556
## 57 0.9210526 0.7695175 0.2207792 0.375 0.56565657
## 58 0.8771930 0.7695175 0.2467532 0.400 0.57575758
## 59 0.8157895 0.7695175 0.2792208 0.450 0.58585859
## 60 0.8070175 0.7695175 0.2727273 0.500 0.59595960
## 61 0.7807018 0.7695175 0.2857143 0.525 0.60606061
## 62 0.7807018 0.7695175 0.2857143 0.525 0.61616162
## 63 0.7807018 0.7695175 0.2857143 0.525 0.62626263
## 64 0.7807018 0.7695175 0.2857143 0.525 0.63636364
## 65 0.7456140 0.7695175 0.3051948 0.550 0.64646465
## 66 0.7456140 0.7695175 0.3051948 0.550 0.65656566
## 67 0.7280702 0.7695175 0.3116883 0.575 0.66666667
## 68 0.6491228 0.7695175 0.3311688 0.725 0.67676768
## 69 0.1666667 0.7695175 0.6168831 1.000 0.68686869
## 70 0.0000000 0.7695175 0.7402597 1.000 0.69696970
## 71 0.0000000 0.7695175 0.7402597 1.000 0.70707071
## 72 0.0000000 0.7695175 0.7402597 1.000 0.71717172
## 73 0.0000000 0.7695175 0.7402597 1.000 0.72727273
## 74 0.0000000 0.7695175 0.7402597 1.000 0.73737374
## 75 0.0000000 0.7695175 0.7402597 1.000 0.74747475
## 76 0.0000000 0.7695175 0.7402597 1.000 0.75757576
## 77 0.0000000 0.7695175 0.7402597 1.000 0.76767677
## 78 0.0000000 0.7695175 0.7402597 1.000 0.77777778
## 79 0.0000000 0.7695175 0.7402597 1.000 0.78787879
## 80 0.0000000 0.7695175 0.7402597 1.000 0.79797980
## 81 0.0000000 0.7695175 0.7402597 1.000 0.80808081
## 82 0.0000000 0.7695175 0.7402597 1.000 0.81818182
## 83 0.0000000 0.7695175 0.7402597 1.000 0.82828283
## 84 0.0000000 0.7695175 0.7402597 1.000 0.83838384
## 85 0.0000000 0.7695175 0.7402597 1.000 0.84848485
## 86 0.0000000 0.7695175 0.7402597 1.000 0.85858586
## 87 0.0000000 0.7695175 0.7402597 1.000 0.86868687
## 88 0.0000000 0.7695175 0.7402597 1.000 0.87878788
## 89 0.0000000 0.7695175 0.7402597 1.000 0.88888889
## 90 0.0000000 0.7695175 0.7402597 1.000 0.89898990
## 91 0.0000000 0.7695175 0.7402597 1.000 0.90909091
## 92 0.0000000 0.7695175 0.7402597 1.000 0.91919192
## 93 0.0000000 0.7695175 0.7402597 1.000 0.92929293
## 94 0.0000000 0.7695175 0.7402597 1.000 0.93939394
## 95 0.0000000 0.7695175 0.7402597 1.000 0.94949495
## 96 0.0000000 0.7695175 0.7402597 1.000 0.95959596
## 97 0.0000000 0.7695175 0.7402597 1.000 0.96969697
## 98 0.0000000 0.7695175 0.7402597 1.000 0.97979798
## 99 0.0000000 0.7695175 0.7402597 1.000 0.98989899
## 100 0.0000000 0.7695175 0.7402597 1.000 1.00000000
gbm_final_thr <- gbm_final_predict %>%
setThreshold(0.59595960)
(dt_performance <- gbm_final_thr %>% performance(measures = list(tpr, auc, mmce, tnr)) )
## tpr auc mmce tnr
## 0.8070175 0.7695175 0.2727273 0.5000000
(dt_cm <- gbm_final_thr %>% calculateROCMeasures() )
## predicted
## true neg pos
## neg 92 22 tpr: 0.81 fnr: 0.19
## pos 20 20 fpr: 0.5 tnr: 0.5
## ppv: 0.82 for: 0.52 lrp: 1.61 acc: 0.73
## fdr: 0.18 npv: 0.48 lrm: 0.39 dor: 4.18
##
##
## Abbreviations:
## tpr - True positive rate (Sensitivity, Recall)
## fpr - False positive rate (Fall-out)
## fnr - False negative rate (Miss rate)
## tnr - True negative rate (Specificity)
## ppv - Positive predictive value (Precision)
## for - False omission rate
## lrp - Positive likelihood ratio (LR+)
## fdr - False discovery rate
## npv - Negative predictive value
## acc - Accuracy
## lrm - Negative likelihood ratio (LR-)
## dor - Diagnostic odds ratio
performance_threshold <- performance(gbm_final_thr, measures = list(tpr, auc, mmce, acc, tnr)) %>%
as.data.frame(row.names = c("True Positive Rate", "Area Under Curve", "Mean Misclassification Error", "Accuracy", "True Negative Rate"))
performance_threshold
## .
## True Positive Rate 0.8070175
## Area Under Curve 0.7695175
## Mean Misclassification Error 0.2727273
## Accuracy 0.7272727
## True Negative Rate 0.5000000
#remotes::install_github("grantmcdermott/parttree")
library(parsnip)
library(parttree)
set.seed(123) ## For consistent jitter
## Build our tree using parsnip (but with rpart as the model engine)
ti_tree =
decision_tree() %>%
set_engine("rpart") %>%
set_mode("classification") %>%
fit(diabetes ~ glucose + mass, data = PimaIndiansDiabetes)
## Plot the data and model partitions
PimaIndiansDiabetes %>%
ggplot(aes(x = glucose, y = mass)) +
geom_jitter(aes(col = diabetes), alpha = 0.7) +
geom_parttree(data = ti_tree, aes(fill = diabetes), alpha = 0.1) +
theme_bw() +
labs(title = "Decision boundaries",
subtitle = "for 2-class classification of RPART model (glucose + mass)",
caption = "Source: Pima Indians Diabetes Database")
Li et al, Visualizing the Loss Landscape of Neural Nets, 2018
http://cs231n.stanford.edu/slides/2018/cs231n_2018_lecture13.pdf
https://projector.tensorflow.org/
https://shirinsplayground.netlify.app/2020/10/keras_workshop_user20/
The Unreasonable Effectiveness of Recurrent Neural Networks; Karpathy, 2015
Seq2Seq-Vis: Visual Debugging Tool for Sequence-to-Sequence Models; Strobelt, 2018
Visualizing and Understanding Convolutional Networks; Zeiler & Fergus, 2013
The Building Blocks of Interpretability; Olah, Satyanarayan, Johnson, Carter, Schubert, Ye, Mordvintsev
playground.tensorflow.org
Distill.pub
research.google.com/bigpicture/attacking-discrimination-in-ml
Google Creative Lab: https://quickdraw.withgoogle.com/
https://poloclub.github.io/ganlab/
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.4 (2021-02-15)
## os macOS Big Sur 10.16
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/Berlin
## date 2021-04-11
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## assertthat 0.2.1 2019-03-21 [2]
## backports 1.2.1 2020-12-09 [2]
## BBmisc 1.11 2017-03-10 [2]
## broom 0.7.5 2021-02-19 [2]
## bslib 0.2.4 2021-01-25 [2]
## cachem 1.0.4 2021-02-13 [2]
## callr 3.5.1 2020-10-13 [2]
## cellranger 1.1.0 2016-07-27 [2]
## checkmate 2.0.0 2020-02-06 [2]
## cli 2.3.1 2021-02-23 [2]
## colorspace 2.0-0 2020-11-11 [2]
## crayon 1.4.1 2021-02-08 [2]
## data.table 1.14.0 2021-02-21 [2]
## DBI 1.1.1 2021-01-15 [2]
## dbplyr 2.1.0 2021-02-03 [2]
## desc 1.3.0 2021-03-05 [2]
## devtools 2.3.2 2020-09-18 [2]
## digest 0.6.27 2020-10-24 [2]
## dplyr * 1.0.5 2021-03-05 [2]
## ellipsis 0.3.1 2020-05-15 [2]
## entropy 1.2.1 2014-11-14 [1]
## evaluate 0.14 2019-05-28 [2]
## fansi 0.4.2 2021-01-15 [2]
## farver 2.1.0 2021-02-28 [2]
## fastmap 1.1.0 2021-01-25 [2]
## fastmatch 1.1-0 2017-01-28 [2]
## forcats * 0.5.1 2021-01-27 [2]
## fs 1.5.0 2020-07-31 [2]
## FSelector * 0.33 2021-02-16 [1]
## gbm 2.1.8 2020-07-15 [2]
## generics 0.1.0 2020-10-31 [2]
## GGally * 2.1.1 2021-03-08 [1]
## ggfortify * 0.4.11 2020-10-02 [2]
## ggplot2 * 3.3.3 2020-12-30 [2]
## glue 1.4.2 2020-08-27 [2]
## gridExtra 2.3 2017-09-09 [2]
## gtable 0.3.0 2019-03-25 [2]
## haven 2.3.1 2020-06-01 [2]
## highr 0.8 2019-03-20 [2]
## hms 1.0.0 2021-01-13 [2]
## htmltools 0.5.1.1 2021-01-22 [2]
## httr 1.4.2 2020-07-20 [2]
## jquerylib 0.1.3 2020-12-17 [2]
## jsonlite 1.7.2 2020-12-09 [2]
## knitr 1.31 2021-01-27 [2]
## labeling 0.4.2 2020-10-20 [2]
## lattice 0.20-41 2020-04-02 [2]
## lifecycle 1.0.0 2021-02-15 [2]
## lubridate 1.7.10 2021-02-26 [2]
## magrittr 2.0.1 2020-11-17 [2]
## MASS * 7.3-53.1 2021-02-12 [2]
## Matrix 1.3-2 2021-01-06 [2]
## memoise 2.0.0 2021-01-26 [2]
## mlbench * 2.1-3 2021-01-29 [1]
## mlr * 2.19.0 2021-02-22 [2]
## mmpf * 0.0.5 2018-10-24 [2]
## modelr 0.1.8 2020-05-19 [2]
## munsell 0.5.0 2018-06-12 [2]
## parallelMap 1.5.0 2020-03-26 [2]
## ParamHelpers * 1.14 2020-03-24 [2]
## parsnip * 0.1.5 2021-01-19 [2]
## parttree * 0.0.1.9000 2021-03-14 [1]
## pillar 1.5.1 2021-03-05 [2]
## pkgbuild 1.2.0 2020-12-15 [2]
## pkgconfig 2.0.3 2019-09-22 [2]
## pkgload 1.2.0 2021-02-23 [2]
## plyr 1.8.6 2020-03-03 [2]
## prettyunits 1.1.1 2020-01-24 [2]
## processx 3.4.5 2020-11-30 [2]
## ps 1.6.0 2021-02-28 [2]
## purrr * 0.3.4 2020-04-17 [2]
## R6 2.5.0 2020-10-28 [2]
## randomForest 4.6-14 2018-03-25 [2]
## RColorBrewer 1.1-2 2014-12-07 [2]
## Rcpp 1.0.6 2021-01-15 [2]
## readr * 1.4.0 2020-10-05 [2]
## readxl 1.3.1 2019-03-13 [1]
## remotes 2.2.0 2020-07-21 [2]
## reprex 1.0.0 2021-01-27 [2]
## reshape 0.8.8 2018-10-23 [1]
## rJava * 0.9-13 2020-07-06 [2]
## rlang 0.4.10 2020-12-30 [2]
## rmarkdown 2.7 2021-02-19 [2]
## rpart * 4.1-15 2019-04-12 [2]
## rpart.plot * 3.0.9 2020-09-17 [1]
## rprojroot 2.0.2 2020-11-15 [2]
## rstudioapi 0.13 2020-11-12 [2]
## rvest 1.0.0 2021-03-09 [2]
## RWeka 0.4-43 2020-08-23 [1]
## RWekajars 3.9.3-2 2019-10-19 [1]
## sass 0.3.1 2021-01-24 [2]
## scagnostics * 0.2-4.1 2018-04-04 [1]
## scales 1.1.1 2020-05-11 [2]
## sessioninfo 1.1.1 2018-11-05 [2]
## stringi 1.5.3 2020-09-09 [2]
## stringr * 1.4.0 2019-02-10 [2]
## survival 3.2-7 2020-09-28 [2]
## testthat 3.0.2 2021-02-14 [2]
## tibble * 3.1.0 2021-02-25 [2]
## tidyr * 1.1.3 2021-03-03 [2]
## tidyselect 1.1.0 2020-05-11 [2]
## tidyverse * 1.3.0 2019-11-21 [2]
## usethis 2.0.1 2021-02-10 [2]
## utf8 1.2.1 2021-03-12 [2]
## vctrs 0.3.6 2020-12-17 [2]
## withr 2.4.1 2021-01-26 [2]
## xfun 0.22 2021-03-11 [2]
## XML 3.99-0.5 2020-07-23 [2]
## xml2 1.3.2 2020-04-23 [2]
## yaml 2.2.1 2020-02-01 [2]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.1)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## Github (grantmcdermott/parttree@9d25d2c)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.4)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.2)
## CRAN (R 4.0.4)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.2)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Users/shiringlander/Library/R/4.0/library
## [2] /Library/Frameworks/R.framework/Versions/4.0/Resources/library